%reload_ext autoreload
%autoreload 2
%matplotlib inline
import fastai as ai
import fastai
from fastai.vision import *
from fastai.callbacks import *
from fastai.vision.gan import *
from PIL import Image, ImageDraw
import shutil
import requests, zipfile, io
import os
from tqdm import tqdm
import dropbox
path = Path("/storage/capstone/")
color = 'color'
grayscale = 'grayscale'
colorized = 'colorized'
path_color = path/color
path_gray = path/grayscale
path_gen = path/colorized
if not path_color.exists():
path_color.mkdir(parents=True)
if not path_gray.exists():
path_gray.mkdir(parents=True)
path.ls()
def download_zip(url, path, fn, chunk_size=128):
r = requests.get(url, stream=True)
if r.ok:
with open(path/fn, 'wb') as f:
for chunk in r.iter_content(chunk_size=chunk_size):
f.write(chunk)
else:
print("request failed")
# download 2017 coco data from http://cocodataset.org/#download
#url = "http://images.cocodataset.org/zips/train2017.zip"
#zip_fn = 'coco.zip'
#download_zip(url, path_color, zip_fn, 1024*1024*100)
# unzip file
#with zipfile.ZipFile(path_color/zip_fn) as z:
# z.extractall(path_color)
# move images from extract folder to parent folder
#for fp in (path_color/'train2017').ls():
# shutil.move(fp, path_color/fp.name)
#os.remove(path_color/zip_fn)
def bnw(fn, i, dest):
try:
im = Image.open(fn)
im.verify()
except:
return
im = Image.open(fn)
im = im.convert('L')
im.save(dest/fn.name, quality=100)
# save data to storage (color and grayscale copies)
#il = ImageList.from_folder(path_color)
#ai.core.parallel(partial(bnw, dest=path_gray), il.items)
def get_generator_data(bs, size, p=1.):
# data source
label_func = lambda x : path_color/x.name
src = (ImageImageList
.from_folder(path_gray).use_partial_data(p)
.split_by_rand_pct(0.1)
.label_from_func(label_func))
# data bunch
data = src.transform(tfms=get_transforms(), size=size, tfm_y=True).databunch(bs=bs).normalize(imagenet_stats, do_y=True)
data.c = 3
return data
# used to save predictions from generator
def save_preds(loader, learner, dest):
if not dest.exists():
dest.mkdir(parents=True)
i=0
images = loader.dataset.items
for batch in loader:
preds = learner.pred_batch(batch=batch, reconstruct=True)
for p in preds:
p.save(dest/images[i].name)
i += 1
def get_critic_data(classes, bs, size):
# data source
src = (ImageList
.from_folder(path, include=classes)
.split_by_rand_pct(0.1)
.label_from_folder(classes=classes))
# data bunch
data = src.transform(tfms=get_transforms(), size=size).databunch(bs=bs).normalize(imagenet_stats)
data.c = 3
return data
bs = 48
size = 128
arch = models.resnet34
loss_gen = MSELossFlat()
wd = 1e-3
def get_generator(data_gen):
return unet_learner(data_gen, arch, loss_func=loss_gen,
wd=wd, blur=True, norm_type=NormType.Weight,
self_attention=True)
loss_crit = AdaptiveLoss(nn.BCEWithLogitsLoss())
def get_critic(data_crit, metrics):
return Learner(data_crit, gan_critic(), metrics=metrics,
loss_func=loss_crit, wd=wd)
data_gen = get_generator_data(bs, size)
data_gen.show_batch(ds_type=DatasetType.Train, rows=1)
generator = get_generator(data_gen)
generator.fit_one_cycle(3, 1e-3)
generator.unfreeze()
generator.lr_find()
generator.recorder.plot()
generator.fit_one_cycle(6, slice(1e-6,1e-4))
generator.show_results(rows=2)
generator.save('gen-pre')
save_preds(data_gen.fix_dl, generator, path_gen)
generator = None
gc.collect()
data_crit = get_critic_data([colorized, color], bs=bs, size=size)
data_crit.show_batch(ds_type=DatasetType.Train, rows=2)
critic = get_critic(data_crit, accuracy_thresh_expand)
critic.fit_one_cycle(6, 1e-3)
critic.save('critic-pre')
critic = None
gc.collect()
def refresh_gan(version, crit_thresh=0.65, loss_weights=(1.,50.), bs=48, size=128, p=1.):
data_gen = get_generator_data(bs, size, p)
data_crit = get_critic_data([grayscale, color], bs=bs, size=size)
generator = get_generator(data_gen)
critic = get_critic(data_crit, metrics=None)
switcher = partial(AdaptiveGANSwitcher, critic_thresh=crit_thresh)
if version == 'pre':
generator.load('gen-pre')
critic.load('critic-pre')
return GANLearner.from_learners(generator, critic, weights_gen=loss_weights,
show_img=True, switcher=switcher,
opt_func=optim.Adam, wd=wd)
return GANLearner.from_learners(generator, critic, weights_gen=loss_weights,
show_img=True, switcher=switcher,
opt_func=optim.Adam, wd=wd).load(version)
lr = 1e-4
learn = refresh_gan('pre')
gc.collect()
learn.fit(5, lr)
learn.save('gan-128-5')
learn = refresh_gan('gan-128-5')
gc.collect()
learn.fit(5, lr)
learn.save('gan-128-10')
learn = refresh_gan('gan-128-10', bs=bs*2//3, size=192)
gc.collect()
learn.fit(2, lr)
learn.save('gan-192-2')
learn = refresh_gan('gan-192-2', bs=bs*2//3, size=192)
gc.collect()
learn.fit(3, lr)
learn.save('gan-192-5')
learn = refresh_gan('gan-192-5', bs=bs*2//3, size=192)
gc.collect()
learn.fit(2, lr)
learn.save('gan-192-7')
learn = refresh_gan('gan-192-5', bs=bs//3, size=256)
gc.collect()
learn.fit(1, lr/2)
learn.save('gan-256-1')
learn = refresh_gan('gan-256-1', bs=bs//3, size=256)
gc.collect()
learn.fit(1, lr/2)
learn.save('gan-256-2')
learn = refresh_gan('gan-256-2', bs=bs//3, size=256)
gc.collect()
learn.fit(1, lr/2)
learn.save('gan-256-3')
learn = refresh_gan('gan-256-3', bs=bs//3, size=256)
gc.collect()
learn.fit(1, lr/2)
learn.save('gan-256-4')
learn = refresh_gan('gan-256-4', bs=bs//3, size=256)
gc.collect()
learn.fit(1, lr/2)
learn.save('gan-256-5')
learn = refresh_gan('gan-256-5', bs=bs//5, size=320)
gc.collect()
learn.fit(1, lr/4)
learn.save('gan-320-1')
learn = refresh_gan('gan-320-1', bs=bs//6, size=320, p=0.8)
gc.collect()
learn.fit(1, lr/4)
learn.save('gan-320-2')
learn = refresh_gan('gan-320-2', bs=bs//6, size=320, p=0.8)
gc.collect()
learn.fit(1, lr/4)
learn.save('gan-320-3')
learn = refresh_gan('gan-320-3', bs=bs//6, size=320, p=0.8)
gc.collect()
learn.fit(1, lr/4)
learn.save('gan-320-4')
learn = refresh_gan('gan-320-4', bs=bs//6, size=320, p=0.8)
gc.collect()
learn.fit(1, lr/4)
learn.save('gan-320-5')
learn = refresh_gan('gan-320-5', bs=bs//6, size=320, p=0.8)
gc.collect()
learn.fit(1, lr/8)
learn.save('gan-320-6')
learn = refresh_gan('gan-320-6', bs=bs//6, size=320, p=0.8)
gc.collect()
learn.fit(1, lr/8)
learn.save('gan-320-7')
learn = refresh_gan('gan-320-7', bs=bs//6, size=320, p=0.8)
gc.collect()
learn.fit(1, lr/8)
learn.save('gan-320-8')
learn = refresh_gan('gan-320-8', bs=bs//6, size=320, p=0.8)
gc.collect()
learn.fit(1, lr/8)
learn.save('gan-320-9')
learn = refresh_gan('gan-320-9', bs=bs//6, size=320, p=0.8)
gc.collect()
learn.fit(1, lr/8)
learn.save('gan-320-10')
learn = refresh_gan('gan-320-10', bs=bs//6, size=320, p=0.8)
gc.collect()
learn.fit(1, lr)
learn.save('gan-320-11')
learn = refresh_gan('gan-320-11', bs=bs//6, size=320, p=0.8)
gc.collect()
learn.fit(1, lr)
learn.save('gan-320-12')
learn = refresh_gan('gan-320-12', bs=bs//6, size=320, p=0.8)
gc.collect()
learn.fit(1, lr)
learn.save('gan-320-13')
learn = refresh_gan('gan-320-13', bs=bs//6, size=320, p=0.8)
gc.collect()
learn.fit(1, lr)
learn.save('gan-320-14')
learn = refresh_gan('gan-320-14', bs=bs//6, size=320, p=0.8)
gc.collect()
learn.fit(1, lr)
learn.save('gan-320-15')
learn = refresh_gan('gan-320-15', bs=bs//6, size=320, p=0.8)
gc.collect()
learn.fit(1, lr)
learn.save('gan-320-16')
learn = refresh_gan('gan-320-16', bs=bs//6, size=320, p=0.8)
gc.collect()
learn.fit(1, lr)
learn.save('gan-320-17') # good results here
learn = refresh_gan('gan-320-17', bs=bs//6, size=320, p=0.8)
gc.collect()
learn.fit(1, lr)
learn.save('gan-320-18')
learn = refresh_gan('gan-320-18', bs=bs//6, size=320, p=0.8)
gc.collect()
learn.fit(1, lr)
learn.save('gan-320-19')
learn = refresh_gan('gan-320-19', bs=bs//6, size=320, p=0.8)
gc.collect()
learn.fit(1, lr)
learn.save('gan-320-20') #good results
learn = refresh_gan('gan-320-20', bs=bs//6, size=320, p=0.8)
gc.collect()
learn.fit(1, lr)
learn.save('gan-320-21')
learn = refresh_gan('gan-320-21', bs=bs//6, size=320, p=0.4)
gc.collect()
learn.fit(3, lr)
learn.save('gan-320-22')
learn = refresh_gan('gan-320-22', bs=bs//6, size=320, p=0.8)
gc.collect()
learn.fit(1, lr)
learn.save('gan-320-23')
learn = refresh_gan('gan-320-23', bs=bs//6, size=320, p=0.8)
gc.collect()
learn.fit(1, lr)
learn.save('gan-320-24') # pretty decent
learn.show_results(rows=6)
learn = refresh_gan('gan-320-24', bs=6, size=320, p=0.2)
i = 20
open_image(path_color.ls()[i])
open_image(path_gray.ls()[i])
gray_img = open_image(path_gray.ls()[i])
learn.predict(gray_img)[0]
learn = refresh_gan('gan-320-24', bs=bs//6, size=320, p=0.8)
save_path = Path('/storage/capstone/colorizer')
fp = learn.save(save_path, return_path=True, with_opt=True) # pretty decent
fp = str(fp)
# export model
learn.export('colorizer.pkl')
fp = learn.path/'colorizer.pkl'
# modified solution found at: https://stackoverflow.com/questions/37397966/dropbox-api-v2-upload-large-files-using-python
def upload(access_token, file_path, target_path, timeout=900, chunk_size=4*1024*1024):
dbx = dropbox.Dropbox(access_token, timeout=timeout)
with open(file_path, "rb") as f:
file_size = os.path.getsize(file_path)
if file_size <= chunk_size:
print(dbx.files_upload(f.read(), target_path))
else:
with tqdm(total=file_size, desc="Uploaded") as pbar:
start_result = dbx.files_upload_session_start(f.read(chunk_size))
pbar.update(chunk_size)
cursor = dropbox.files.UploadSessionCursor(session_id=start_result.session_id, offset=f.tell())
commit = dropbox.files.CommitInfo(path=target_path)
while f.tell() < file_size:
if (file_size - f.tell()) <= chunk_size:
print(dbx.files_upload_session_finish(f.read(chunk_size), cursor, commit))
else:
dbx.files_upload_session_append(
f.read(chunk_size),
cursor.session_id,
cursor.offset,
)
cursor.offset = f.tell()
pbar.update(chunk_size)
key = "Wx9jfJEGYHsAAAAAAAAAU_4bjtXXeDSNPqylOcF_k6q4VB-lzxJ6vv_WPwxDVIQ2"
db_path = "/Apps/ImgBuff/colorizer.pth"
upload(key, fp, db_path)
gray = '/storage/capstone/grayscale/000000354235.jpg'
color = '/storage/capstone/color/000000354235.jpg'
key = "Wx9jfJEGYHsAAAAAAAAAU_4bjtXXeDSNPqylOcF_k6q4VB-lzxJ6vv_WPwxDVIQ2"
db_path = "/Apps/ImgBuff/grayscale/000000354235.jpg"
upload(key, gray, db_path)
db_path = "/Apps/ImgBuff/color/000000354235.jpg"
upload(key, color, db_path)